u32 vmx_vmentry_control __read_mostly;
bool_t cpu_has_vmx_ins_outs_instr_info __read_mostly;
+static DEFINE_PER_CPU(struct vmcs_struct *, host_vmcs);
static DEFINE_PER_CPU(struct vmcs_struct *, current_vmcs);
+static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
static u32 vmcs_revision_id __read_mostly;
static void __vmx_clear_vmcs(void *info)
{
struct vcpu *v = info;
+ struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
+
+ /* Otherwise we can nest (vmx_suspend_cpu() vs. vmx_clear_vmcs()). */
+ ASSERT(!local_irq_is_enabled());
+
+ if ( arch_vmx->active_cpu == smp_processor_id() )
+ {
+ __vmpclear(virt_to_maddr(arch_vmx->vmcs));
- __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+ arch_vmx->active_cpu = -1;
+ arch_vmx->launched = 0;
- v->arch.hvm_vmx.active_cpu = -1;
- v->arch.hvm_vmx.launched = 0;
+ list_del(&arch_vmx->active_list);
- if ( v->arch.hvm_vmx.vmcs == this_cpu(current_vmcs) )
- this_cpu(current_vmcs) = NULL;
+ if ( arch_vmx->vmcs == this_cpu(current_vmcs) )
+ this_cpu(current_vmcs) = NULL;
+ }
}
static void vmx_clear_vmcs(struct vcpu *v)
{
int cpu = v->arch.hvm_vmx.active_cpu;
- if ( cpu == -1 )
- return;
-
- if ( cpu == smp_processor_id() )
- return __vmx_clear_vmcs(v);
-
- on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
+ if ( cpu != -1 )
+ on_selected_cpus(cpumask_of_cpu(cpu), __vmx_clear_vmcs, v, 1, 1);
}
static void vmx_load_vmcs(struct vcpu *v)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ if ( v->arch.hvm_vmx.active_cpu == -1 )
+ {
+ list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list));
+ v->arch.hvm_vmx.active_cpu = smp_processor_id();
+ }
+
+ ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id());
+
__vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
- v->arch.hvm_vmx.active_cpu = smp_processor_id();
this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs;
+
+ local_irq_restore(flags);
+}
+
+void vmx_suspend_cpu(void)
+{
+ struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list);
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ while ( !list_empty(active_vmcs_list) )
+ __vmx_clear_vmcs(list_entry(active_vmcs_list->next,
+ struct vcpu, arch.hvm_vmx.active_list));
+
+ if ( read_cr4() & X86_CR4_VMXE )
+ {
+ __vmxoff();
+ clear_in_cr4(X86_CR4_VMXE);
+ }
+
+ local_irq_restore(flags);
+}
+
+void vmx_resume_cpu(void)
+{
+ if ( !read_cr4() & X86_CR4_VMXE )
+ {
+ set_in_cr4(X86_CR4_VMXE);
+ if ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
+ BUG();
+ }
}
void vmx_vmcs_enter(struct vcpu *v)
struct vmcs_struct *vmx_alloc_host_vmcs(void)
{
- return vmx_alloc_vmcs();
+ ASSERT(this_cpu(host_vmcs) == NULL);
+ this_cpu(host_vmcs) = vmx_alloc_vmcs();
+ INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
+ return this_cpu(host_vmcs);
}
void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
{
+ ASSERT(vmcs == this_cpu(host_vmcs));
vmx_free_vmcs(vmcs);
+ this_cpu(host_vmcs) = NULL;
}
struct xgt_desc {
int vmx_create_vmcs(struct vcpu *v)
{
- if ( v->arch.hvm_vmx.vmcs == NULL )
+ struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
+
+ if ( arch_vmx->vmcs == NULL )
{
- if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
+ if ( (arch_vmx->vmcs = vmx_alloc_vmcs()) == NULL )
return -ENOMEM;
- __vmx_clear_vmcs(v);
+ INIT_LIST_HEAD(&arch_vmx->active_list);
+ __vmpclear(virt_to_maddr(arch_vmx->vmcs));
+ arch_vmx->active_cpu = -1;
+ arch_vmx->launched = 0;
}
construct_vmcs(v);
vmx_restore_dr(v);
}
-static void stop_vmx(void)
-{
- if ( !(read_cr4() & X86_CR4_VMXE) )
- return;
-
- __vmxoff();
- clear_in_cr4(X86_CR4_VMXE);
-}
-
static void vmx_store_cpu_guest_regs(
struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
{
static struct hvm_function_table vmx_function_table = {
.name = "VMX",
- .disable = stop_vmx,
.domain_initialise = vmx_domain_initialise,
.domain_destroy = vmx_domain_destroy,
.vcpu_initialise = vmx_vcpu_initialise,
.inject_exception = vmx_inject_exception,
.init_ap_context = vmx_init_ap_context,
.init_hypercall_page = vmx_init_hypercall_page,
- .event_injection_faulted = vmx_event_injection_faulted
+ .event_injection_faulted = vmx_event_injection_faulted,
+ .suspend_cpu = vmx_suspend_cpu,
+ .resume_cpu = vmx_resume_cpu,
};
int start_vmx(void)
struct hvm_function_table {
char *name;
- /*
- * Disable HVM functionality
- */
- void (*disable)(void);
-
/*
* Initialise/destroy HVM domain/vcpu resources
*/
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*event_injection_faulted)(struct vcpu *v);
+
+ void (*suspend_cpu)(void);
+ void (*resume_cpu)(void);
};
extern struct hvm_function_table hvm_funcs;
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK (1U << TRAP_machine_check)
+static inline void hvm_suspend_cpu(void)
+{
+ if ( hvm_funcs.suspend_cpu )
+ hvm_funcs.suspend_cpu();
+}
+
+static inline void hvm_resume_cpu(void)
+{
+ if ( hvm_funcs.resume_cpu )
+ hvm_funcs.resume_cpu();
+}
+
#endif /* __ASM_X86_HVM_HVM_H__ */